%load_ext autoreload
%autoreload 2
%pylab inline
Populating the interactive namespace from numpy and matplotlib
%reset_selective -f .*(?<!_)$
from custom_imports import *
from pathos.multiprocessing import ProcessingPool, ThreadingPool
#p = ProcessingPool(30)
masked_images_ = (joblib.load('cleaned_muscles.pkl'))
raw_images_ = (joblib.load('raw_images.pkl'))
original_images_ = (joblib.load('images.pkl'))
masked_images_ = [masked_images_[i] for i in A]
raw_images_ = [raw_images_[i] for i in A]
original_images_ = [original_images_[i] for i in A]
#masked_images = bag.from_sequence(_masked_images)
#raw_images = bag.from_sequence(_raw_images)
#original_images = bag.from_sequence(_original_images)
#del _masked_images
#del _raw_images
#del _original_images
plt.imshow(_masked_images[0])
<matplotlib.image.AxesImage at 0x719e5260b358>
img_indx = np.random.randint(len(_raw_images))
scnd_img_to_drw = get_torso_outline((_original_images[img_indx], _masked_images[img_indx]-1))
if scnd_img_to_drw is None:
print("is None")
else:
plt.imshow(scnd_img_to_drw)
((1400, 1991, 3), (1400, 1991))
plt.imshow(_masked_images[img_indx])
plt.show()
plt.imshow(_raw_images[img_indx])
plt.show()
plt.imshow(scnd_img_to_drw)
<matplotlib.image.AxesImage at 0x719e52415908>
%reset_selective -f .*(?<!_)$
from custom_imports import *
#PARAMS
dsize = resize_width, resize_height = 300, 300
neighSize = 41
STEP = 2
def gray_scale(x):
if len(x.shape)>2:
return x.mean(axis=2)
def min_1(x):
return x-1
x = p.map(gray_scale, _raw_images)
y = p.map(min_1, _masked_images)#.map(mark_margins, n_size=neighSize)
cond = p.map(get_shape_ratios, zip(x, y))
x = [i for i, c in zip(x, cond) if c]
y = [i for i, c in zip(y, cond) if c]
z = p.map(get_torso_outline, zip(x, y))
def mardas(i):
return resize_image(i, dsize=dsize)
rszd_x = map(mardas, x)
rszd_x = list(rszd_x)
rszd_y = list(map(mardas, y))
rszd_z = list(map(lambda i: None if i is None else resize_image(i, dsize=dsize), z))
nrmlzd_rszd_x = list(map(normalize_image, rszd_x))
IXYZ = list((i, x, y, z) for i, x, y, z in zip(A, nrmlzd_rszd_x, rszd_y, rszd_z) if z is not None)
joblib.dump(IXYZ, 'IXYZ.pkl')
%reset_selective -f .*(?<!_)$
from custom_imports import *
IXYZ = joblib.load('IXYZ.pkl')
i = np.random.randint(len(IXYZ))
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
axes = axes.reshape(-1)
for i, x in enumerate(IXYZ[i][1:]):
axes[i].imshow(x)
#preload = xy_.compute()
At this point, let's just take random samples, in the future I can change this so I can decide what proportion of each label I want to pick; that is, setting a weight for each label.
It is important to note that in order to make random kernel truly random, we need to explicitly pass the random seed to them. Otherwise the random states for all kernels (i.e. get_sample below) will have the same random states, leading to the same results on different runs.
XY is saved in ./cleaned_data.pkl.
%reset_selective -f .*(?<!_)$
%load_ext autoreload
%autoreload 2
from custom_imports import *
def get_neighbors_masked(ixyz, neigh_size):
m = neigh_size//2
print(m)
i, x, y, z= ixyz
# change later
x = (x-x.mean())/x.std()
w, h, *_ = x.shape
X = []
Y = []
indices = list(zip(*np.where(z)))
for i, j in indices:
tmp = x[i-m:i+m, j-m:j+m]
if tmp.shape == (2*m, 2*m):
X.append(tmp)
Y.append(y[i, j])
return np.array(X), np.array(Y)
#PARAMS
dsize = resize_width, resize_height = 300, 300
neighSize = 41
STEP = 2
p = ProcessingPool(30)
IXYZ = joblib.load("IXYZ.pkl")
mardas = p.map(lambda x: get_neighbors_masked(x, neigh_size=neighSize), IXYZ)
X = np.concatenate([x[0] for x in mardas])
Y = np.concatenate([x[1] for x in mardas])
The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload
X.shape, Y.shape
plt.imshow(X[156])
(11507474,)
# Keras simple deep model:
def build_v02(width, height, depth, reg, init="he_normal"):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# our first CONV layer will learn a total of 16 filters, each
# Of which are 7x7 -- we'll then apply 2x2 strides to reduce
# the spatial dimensions of the volume
#model.add(Conv2D(32, (10, 10), strides=(1, 1), padding="same",
# kernel_initializer=init,# kernel_regularizer=reg,
# input_shape=inputShape))
# here we stack two CONV layers on top of each other where
# each layerswill learn a total of 32 (3x3) filters
model.add(Conv2D(32, (10, 10), padding="same",
kernel_initializer=init, #kernel_regularizer=reg)
))
model.add(Activation("relu"))
#model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (5, 5), strides=(1, 1), padding="same",
kernel_initializer=init, #kernel_regularizer=reg)
))
model.add(Activation("relu"))
#model.add(BatchNormalization(axis=chanDim))
model.add(Dropout(0.25))
# stack two more CONV layers, keeping the size of each filter
# as 3x3 but increasing to 64 total learned filters
#model.add(Conv2D(64, (3, 3), padding="same",
# kernel_initializer=init, kernel_regularizer=reg))
#model.add(Activation("relu"))
##model.add(BatchNormalization(axis=chanDim))
#model.add(Conv2D(64, (3, 3), strides=(1, 1), padding="same",
# kernel_initializer=init, kernel_regularizer=reg))
#model.add(Activation("relu"))
##model.add(BatchNormalization(axis=chanDim))
#model.add(Dropout(0.25))
#
## increase the number of filters again, this time to 128
#model.add(Conv2D(128, (3, 3), padding="same",
# kernel_initializer=init, kernel_regularizer=reg))
#model.add(Activation("relu"))
##model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(10, (3, 3), strides=(1, 1), padding="same",
kernel_initializer=init, #kernel_regularizer=reg
)
)
model.add(Activation("relu"))
model.add(Conv2D(1, (3, 3), strides=(1, 1), padding="same",
kernel_initializer=init, #kernel_regularizer=reg)
))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(10))
model.add(Dense(1))
model.add(Activation("sigmoid"))
#model.add(BatchNormalization(axis=chanDim))
return model
# Keras simple deep model:
def build_v03(width, height, depth, reg, init="he_normal"):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# our first CONV layer will learn a total of 16 filters, each
# Of which are 7x7 -- we'll then apply 2x2 strides to reduce
# the spatial dimensions of the volume
model.add(Conv2D(32, (30, 30), strides=(1, 1), padding="same",
kernel_initializer=init,# kernel_regularizer=reg,
input_shape=inputShape))
model.add(Conv2D(32, (15, 15), strides=(1, 1), padding="same",
kernel_initializer=init,# kernel_regularizer=reg,
input_shape=inputShape))
# here we stack two CONV layers on top of each other where
# each layerswill learn a total of 32 (3x3) filters
model.add(Conv2D(32, (10, 10), padding="same",
kernel_initializer=init, #kernel_regularizer=reg)
))
#model.add(Activation("relu"))
#model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (5, 5), strides=(1, 1), padding="same",
kernel_initializer=init, #kernel_regularizer=reg)
))
#model.add(Activation("relu"))
#model.add(BatchNormalization(axis=chanDim))
#model.add(Dropout(0.25))
# stack two more CONV layers, keeping the size of each filter
# as 3x3 but increasing to 64 total learned filters
#model.add(Conv2D(64, (3, 3), padding="same",
# kernel_initializer=init, kernel_regularizer=reg))
#model.add(Activation("relu"))
##model.add(BatchNormalization(axis=chanDim))
#model.add(Conv2D(64, (3, 3), strides=(1, 1), padding="same",
# kernel_initializer=init, kernel_regularizer=reg))
#model.add(Activation("relu"))
##model.add(BatchNormalization(axis=chanDim))
#model.add(Dropout(0.25))
#
## increase the number of filters again, this time to 128
#model.add(Conv2D(128, (3, 3), padding="same",
# kernel_initializer=init, kernel_regularizer=reg))
#model.add(Activation("relu"))
##model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(10, (3, 3), strides=(1, 1), padding="same",
kernel_initializer=init, #kernel_regularizer=reg
)
)
model.add(Activation("relu"))
model.add(Conv2D(1, (3, 3), strides=(1, 1), padding="same",
kernel_initializer=init, #kernel_regularizer=reg)
))
#model.add(Activation("relu"))
#model.add(Flatten())
#model.add(Dense(100))
#model.add(Dense(10))
#model.add(Dense(1))
model.add(Activation("sigmoid"))
#model.add(BatchNormalization(axis=chanDim))
return model
*_, m, n = X.shape
#if len(_) == 1:
# X = X.reshape(-1, m, n, 1)
# Y = Y.reshape(-1, 1)
Xtr, Xts, Ytr, Yts = train_test_split(X.reshape(*X.shape, 1), Y.reshape(*Y.shape, 1)==1, test_size=.2)
# creating and starting keras model example:
opt = Adam(lr=1e-4)#, decay=1e-4 / epochs)
#v02
#model = build(width=neighSize, height=neighSize, depth=1, reg=l2(0.0005))
model = build_v02(width=m, height=n, depth=1, reg=l2(0.0005))
model.compile(loss=losses.binary_crossentropy, optimizer=opt, metrics=["accuracy"])
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-50-4152538a361a> in <module> 4 #v02 5 #model = build(width=neighSize, height=neighSize, depth=1, reg=l2(0.0005)) ----> 6 model = build_v02(width=m, height=n, depth=1, reg=l2(0.0005)) 7 model.compile(loss=losses.binary_crossentropy, optimizer=opt, metrics=["accuracy"]) NameError: name 'build_v02' is not defined
1 - (Ytr.sum()/(Ytr>-1).sum())
0.6613350953766025
Xtr.shape
(9205979, 40, 40, 1)
#model.fit(X, Y, batch_size=1)
# verion02
#model.fit_generator(gen(Xtr, Ytr, 10, neighSize), 1000, epochs=1000)#, validation_data=gen(Xts, Yts, 10, neighSize))
model.fit(Xtr, Ytr, batch_size=800, epochs=500)#, validation_data=gen(Xts, Yts, 10, neighSize))
Epoch 1/500 9205979/9205979 [==============================] - 3418s 371us/step - loss: 0.1841 - acc: 0.9273 Epoch 2/500 4750400/9205979 [==============>...............] - ETA: 27:30 - loss: 0.1428 - acc: 0.9442
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
9205979/9205979 [==============================] - 3416s 371us/step - loss: 0.1381 - acc: 0.9461 Epoch 3/500 156000/9205979 [..............................] - ETA: 57:26 - loss: 0.1288 - acc: 0.9497
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
4910400/9205979 [===============>..............] - ETA: 26:36 - loss: 0.1263 - acc: 0.9507
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
9205979/9205979 [==============================] - 3416s 371us/step - loss: 0.1239 - acc: 0.9516 Epoch 4/500 3279200/9205979 [=========>....................] - ETA: 36:36 - loss: 0.1177 - acc: 0.9541
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
9205979/9205979 [==============================] - 3411s 371us/step - loss: 0.1156 - acc: 0.9548 Epoch 5/500 1606400/9205979 [====>.........................] - ETA: 46:56 - loss: 0.1116 - acc: 0.9564
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
6348800/9205979 [===================>..........] - ETA: 17:38 - loss: 0.1105 - acc: 0.9567
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
9205979/9205979 [==============================] - 3408s 370us/step - loss: 0.1098 - acc: 0.9570 Epoch 6/500 1877600/9205979 [=====>........................] - ETA: 45:14 - loss: 0.1066 - acc: 0.9584
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
6419200/9205979 [===================>..........] - ETA: 17:11 - loss: 0.1060 - acc: 0.9586
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
9205979/9205979 [==============================] - 3407s 370us/step - loss: 0.1056 - acc: 0.9587 Epoch 7/500 1952800/9205979 [=====>........................] - ETA: 44:45 - loss: 0.1032 - acc: 0.9595
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
6703200/9205979 [====================>.........] - ETA: 15:26 - loss: 0.1027 - acc: 0.9598
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
9205979/9205979 [==============================] - 3406s 370us/step - loss: 0.1024 - acc: 0.9599 Epoch 8/500 2244800/9205979 [======>.......................] - ETA: 42:56 - loss: 0.1005 - acc: 0.9606
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
6988800/9205979 [=====================>........] - ETA: 13:40 - loss: 0.1000 - acc: 0.9608
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
2528800/9205979 [=======>......................] - ETA: 41:11 - loss: 0.0984 - acc: 0.9614
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7011200/9205979 [=====================>........] - ETA: 13:31 - loss: 0.0978 - acc: 0.9617
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
2546400/9205979 [=======>......................] - ETA: 41:05 - loss: 0.0962 - acc: 0.9622
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7284000/9205979 [======================>.......] - ETA: 11:51 - loss: 0.0959 - acc: 0.9624
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
2788800/9205979 [========>.....................] - ETA: 39:35 - loss: 0.0945 - acc: 0.9629
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7203200/9205979 [======================>.......] - ETA: 12:20 - loss: 0.0944 - acc: 0.9629
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
2708800/9205979 [=======>......................] - ETA: 40:03 - loss: 0.0933 - acc: 0.9633
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7416800/9205979 [=======================>......] - ETA: 11:01 - loss: 0.0931 - acc: 0.9634
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
2923200/9205979 [========>.....................] - ETA: 38:44 - loss: 0.0921 - acc: 0.9637
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7633600/9205979 [=======================>......] - ETA: 9:41 - loss: 0.0918 - acc: 0.9638
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3136800/9205979 [=========>....................] - ETA: 37:25 - loss: 0.0908 - acc: 0.9643
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7867200/9205979 [========================>.....] - ETA: 8:15 - loss: 0.0907 - acc: 0.9643
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3401600/9205979 [==========>...................] - ETA: 35:46 - loss: 0.0900 - acc: 0.9645
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
8112800/9205979 [=========================>....] - ETA: 6:44 - loss: 0.0898 - acc: 0.9646
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3618400/9205979 [==========>...................] - ETA: 34:26 - loss: 0.0891 - acc: 0.9648
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
9205979/9205979 [==============================] - 3404s 370us/step - loss: 0.0889 - acc: 0.9649 Epoch 17/500 9205979/9205979 [==============================] - 3405s 370us/step - loss: 0.0880 - acc: 0.9653 Epoch 18/500 9205979/9205979 [==============================] - 3403s 370us/step - loss: 0.0873 - acc: 0.9655 Epoch 19/500 2898400/9205979 [========>.....................] - ETA: 38:52 - loss: 0.0867 - acc: 0.9657
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7619200/9205979 [=======================>......] - ETA: 9:46 - loss: 0.0866 - acc: 0.9657
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3031200/9205979 [========>.....................] - ETA: 38:03 - loss: 0.0862 - acc: 0.9659
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7768000/9205979 [========================>.....] - ETA: 8:51 - loss: 0.0860 - acc: 0.9659
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3169600/9205979 [=========>....................] - ETA: 37:12 - loss: 0.0853 - acc: 0.9662
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
7913600/9205979 [========================>.....] - ETA: 7:57 - loss: 0.0854 - acc: 0.9662
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3447200/9205979 [==========>...................] - ETA: 35:29 - loss: 0.0849 - acc: 0.9664
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
8185600/9205979 [=========================>....] - ETA: 6:17 - loss: 0.0849 - acc: 0.9664
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3718400/9205979 [===========>..................] - ETA: 33:49 - loss: 0.0844 - acc: 0.9666
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
3903200/9205979 [===========>..................] - ETA: 32:41 - loss: 0.0844 - acc: 0.9666
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-89-48847dc380c3> in <module> 2 # verion02 3 #model.fit_generator(gen(Xtr, Ytr, 10, neighSize), 1000, epochs=1000)#, validation_data=gen(Xts, Yts, 10, neighSize)) ----> 4 model.fit(Xtr, Ytr, batch_size=800, epochs=500)#, validation_data=gen(Xts, Yts, 10, neighSize)) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs) 1035 initial_epoch=initial_epoch, 1036 steps_per_epoch=steps_per_epoch, -> 1037 validation_steps=validation_steps) 1038 1039 def evaluate(self, x=None, y=None, ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/keras/engine/training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps) 197 ins_batch[i] = ins_batch[i].toarray() 198 --> 199 outs = f(ins_batch) 200 outs = to_list(outs) 201 for l, o in zip(out_labels, outs): ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs) 2670 'In order to feed symbolic tensors to a Keras model ' 2671 'in TensorFlow, you need tensorflow 1.8 or higher.') -> 2672 return self._legacy_call(inputs) 2673 2674 ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _legacy_call(self, inputs) 2652 session = get_session() 2653 updated = session.run(fetches=fetches, feed_dict=feed_dict, -> 2654 **self.session_kwargs) 2655 return updated[:len(self.outputs)] 2656 ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata) 787 try: 788 result = self._run(None, fetches, feed_dict, options_ptr, --> 789 run_metadata_ptr) 790 if run_metadata: 791 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 995 if final_fetches or final_targets: 996 results = self._do_run(handle, final_targets, final_fetches, --> 997 feed_dict_string, options, run_metadata) 998 else: 999 results = [] ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata) 1130 if handle is None: 1131 return self._do_call(_run_fn, self._session, feed_dict, fetch_list, -> 1132 target_list, options, run_metadata) 1133 else: 1134 return self._do_call(_prun_fn, self._session, handle, feed_dict, ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1137 def _do_call(self, fn, *args): 1138 try: -> 1139 return fn(*args) 1140 except errors.OpError as e: 1141 message = compat.as_text(e.message) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata) 1119 return tf_session.TF_Run(session, options, 1120 feed_dict, fetch_list, target_list, -> 1121 status, run_metadata) 1122 1123 def _prun_fn(session, handle, feed_dict, fetch_list): KeyboardInterrupt:
import datetime, re
d = re.sub(r':| |\.', '_', f"{datetime.datetime.now()}")
model.save(f'best_so_far_neighbor_{neighSize}_{d}.model_v03')
ls *v03
best_so_far_neighbor_17_2020-04-13_17_23_17_464782.model_v03 best_so_far_neighbor_41_2020-04-14_14_40_34_896919.model_v03
from keras.models import load_model
model = load_model(f'best_so_far_neighbor_41_2020-04-14_14_40_34_896919.model_v03')
Using TensorFlow backend.
from functools import reduce
def predict_image(x, model, neigh_size):
m = neigh_size//2
xn = get_neighbors(x, neigh_size, step=1)
print(x.shape)
m = neigh_size//2
*_, w, h = tuple(x-2*m for x in x.shape)
yn = model.predict(xn.reshape(*xn.shape, 1))
print(w, h, w*h, yn.shape)
return yn.reshape(w, h)
n = np.random.randint(len(XYZ))
print(n)
res = predict_image(XYZ[n][0], model, neighSize)
62 (300, 300) 260 260 67600 (67600, 1)
m = neighSize//2
x = res>.1
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
#axes[0].imshow(x)
axes[0].imshow(XYZ[n][0])
axes[1].imshow(XYZ[n][1][m:-m, m:-m])
#axes[3].imshow(XYZ[n][2][m:-m, m:-m])
mkkernel = lambda k: np.ones((k, k))
t = cv2.erode((XYZ[n][2][m:-m, m:-m])*x, mkkernel(2), 1)
axes[2].imshow(t)
<matplotlib.image.AxesImage at 0x793853cd0e10>
import datetime, re
d = re.sub(r':| |\.', '_', f"{datetime.datetime.now()}")
model2.save(f'best_so_far_neighbor_{neighSize}_{d}.model_v04')
ls *v03
best_so_far_neighbor_17_2020-04-13_17_23_17_464782.model_v03 best_so_far_neighbor_41_2020-04-14_14_40_34_896919.model_v03
from keras.models import load_model
model2 = load_model(f'best_so_far_neighbor_41_2020-04-14_14_40_34_896919.model_v03')
tf.test.is_gpu_available()
False
n = np.random.randint(len(XYZ))
print(n)
res = predict_image(XYZ[n][0], model2, neighSize)
55 (300, 300) 260 260 67600 (67600, 1)
res.shape
(260, 260)
XYZ[n][2].shape
(300, 300)
m = neighSize//2
x = res>.1
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
#axes[0].imshow(x)
axes[0].imshow(XYZ[n][0])
axes[1].imshow(XYZ[n][1][m:-m, m:-m])
#axes[3].imshow(nw_XYZ[n][2][m:-m, m:-m])
mkkernel = lambda k: np.ones((k, k))
t = cv2.erode((XYZ[n][2][m:-m, m:-m])*x, mkkernel(2), 1)
axes[2].imshow(t)
<matplotlib.image.AxesImage at 0x7938400a0a58>
#np.random.choice(A, replace=False, size=10)
indices = np.random.choice(range(len(_masked_images)), size=15)
indices
array([136, 183, 392, 209, 278, 75, 482, 458, 441, 86, 226, 379, 269,
124, 435])
mkdir 'masked'
mkdir: cannot create directory ‘masked’: File exists
for i in indices:
cv2.imwrite('./masked/masked_%03d.png'%i, (_masked_images[i]-1)*255)
masked_021.png masked_088.png masked_144.png masked_219.png masked_289.png masked_030.png masked_120.png masked_158.png masked_239.png masked_334.png masked_062.png masked_140.png masked_195.png masked_262.png masked_436.png
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import cv2
from dask.distributed import Client, LocalCluster
from dask import delayed, compute
from dask import bag, array as da
%load_ext autoreload
%autoreload 2
%pylab inline
/home/bzr0014/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:458: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/home/bzr0014/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:459: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/home/bzr0014/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:460: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/home/bzr0014/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:461: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/home/bzr0014/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:462: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/home/bzr0014/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:465: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
Populating the interactive namespace from numpy and matplotlib
# keras important imports:
# import the necessary packages
from keras.models import Sequential
from keras.models import load_model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras import backend as K
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.regularizers import l2
import glob
files = glob.glob('masked/*png')
import re
k = 10
def mardas(x):
x = cv2.resize(x[:, :, 2], (300, 300), interpolation=cv2.INTER_NEAREST)
y = cv2.dilate((x).astype('uint8'), np.ones((3,3)))
indcs = np.where(y>0)
t = np.array(indcs).T
f = np.array([(y>0)[i-k:i+k, j-k:j+k].reshape(-1) for i,j in zip(*indcs)])
return y, indcs, np.concatenate([f, t], axis=1), y[indcs]
def mardas(args):
x, y = args
y = y.mean(axis=2)
x = cv2.resize(x[:, :, 2], (300, 300), interpolation=cv2.INTER_NEAREST)
y = cv2.resize(y, (300, 300))
z = cv2.dilate((x).astype('uint8'), np.ones((3,3)))
indcs = np.where(z>0)
t = np.array(indcs).T
f = np.array([(z>0)[i-k:i+k, j-k:j+k] for i,j in zip(*indcs)])
ff = np.array([y[i-k:i+k, j-k:j+k] for i,j in zip(*indcs)])
f = f.reshape(*f.shape, 1)
ff = ff.reshape(*ff.shape, 1)
return z, indcs, (f, ff, t), z[indcs]
X_ = [_raw_images[int(re.findall(r'_(\d{3})\.', x)[0])] for x in files]
Xy = list(map(mardas, zip(map(cv2.imread, files), X_)))
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
encoder = OneHotEncoder()
scaler = MinMaxScaler()
#X = np.concatenate([x[2] for x in Xy])
#X = scaler.fit_transform(X)
X1 = np.concatenate([x[2][0] for x in Xy])
mu1, sig1 = X1.mean(), X1.std()
X1 = (X1-mu1)/sig1
X2 = np.concatenate([x[2][1] for x in Xy])
mu2, sig2 = X2.mean(), X2.std()
X2 = (X2-mu2)/sig2
X3 = np.concatenate([x[2][2] for x in Xy])
X3 = scaler.fit_transform(X3)
y = np.concatenate([x[3] for x in Xy]).reshape(-1 ,1)
y = encoder.fit_transform(y).toarray()
X1.shape, X2.shape, X3.shape, y.shape
((229227, 20, 20, 1), (229227, 20, 20, 1), (229227, 2), (229227, 11))
from sklearn.model_selection import train_test_split
rnd = np.random.rand(len(X1))
X1tr = X1[rnd>.1]
X1ts = X1[rnd<.1]
X2tr = X2[rnd>.1]
X2ts = X2[rnd<.1]
X3tr = X3[rnd>.1]
X3ts = X3[rnd<.1]
ytr = y[rnd>.1]
yts = y[rnd<.1]
def baseline_model():
# create model
model = Sequential()
model.add(Dense(20, input_dim=(k*2)**2+2, activation='relu'))
model.add(Dense(11, activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = baseline_model()
import keras
input1 = keras.layers.Input(shape=(2*k,2*k,1))
x1 = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(input1)
x1 = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x1)
x1 = keras.layers.Flatten()(x1)
x1 = keras.layers.Dense(11, activation='relu')(x1)
input2 = keras.layers.Input(shape=(2*k,2*k,1))
x2 = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(input2)
x2 = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x2)
x2 = keras.layers.Flatten()(x2)
x2 = keras.layers.Dense(11, activation='relu')(x2)
input3 = keras.layers.Input(shape=(2,))
x3 = keras.layers.Dense(11, activation='relu')(input3)
#x2 = keras.layers.Dense(11, activation='relu')(x2)
# equivalent to added = keras.layers.add([x1, x2])
added = keras.layers.Add()([x1, x2, x3])
added = keras.layers.Dense(11, activation='relu')(added)
out = keras.layers.Dense(11, activation='sigmoid')(added)
model = keras.models.Model(inputs=[input1, input2, input3], outputs=out)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit([X1tr, X2tr, X3tr], ytr, batch_size=800, epochs=100, validation_data=([X1ts, X2ts, X3ts], yts))
nn = np.random.randint(len(Xy))
print(nn)
_1, _2, _3 = Xy[nn][:3]
13
_4, _5, _6 = (_3[0]-mu1)/sig1, (_3[1]-mu2)/sig2, scaler.transform(_3[2])
_ = model.predict([_4, _5, _6])
_ = _ / _.sum(axis=1, keepdims=1)
_ = encoder.inverse_transform(_>.1)
imt = np.zeros_like(_1)
#imt[_2] = _.reshape(-1)
imt[_2] = _.reshape(-1)
a = np.zeros_like(imt)
for i in np.where(np.bincount(imt.reshape(-1)))[0][1:]:
tmp = cv2.erode((imt == i).astype('uint8'), np.ones((2, 2)))
_, cnts, _ = cv2.findContours(tmp.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=lambda x: -len(x))[:1]
tmp = np.zeros_like(tmp)
a += cv2.drawContours(tmp, cnts, -1, int(i), -1)
#a += cv2.dilate(tmp, np.ones((4, 4)))*i
plt.imshow(a)
<matplotlib.image.AxesImage at 0x793831965e48>
def mardas(x):
imt = np.zeros(tuple(i+2*k for i in x.shape))
imt[k:-k, k:-k] = x
x = imt
indcs = np.where(x>0)
t = np.array(indcs).T
f = np.array([(x>0)[i-k:i+k, j-k:j+k] for i,j in zip(*indcs)])
f = f.reshape(*f.shape, 1)
return x, indcs, [f, t], x[indcs]
def get_classes(x):
_1, _2, (_31, _32), *_ = mardas(x)
_4 = model.predict([(_31-mu)/std, scaler.transform(_32)])
imt = np.zeros_like(x)
yprd = encoder.inverse_transform(((_4 / _4.sum(axis=1, keepdims=True)) > .1)*1)
imt[_2] = yprd.reshape(-1)
return imt
plt.imshow(get_classes(_1))
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-357-d9e280a24025> in <module> 14 imt[_2] = yprd.reshape(-1) 15 return imt ---> 16 plt.imshow(get_classes(_1)) <ipython-input-357-d9e280a24025> in get_classes(x) 9 def get_classes(x): 10 _1, _2, _3, *_ = mardas(x) ---> 11 _4 = model.predict(scaler.transform(_3)) 12 imt = np.zeros_like(x) 13 yprd = encoder.inverse_transform(((_4 / _4.sum(axis=1, keepdims=True)) > .1)*1) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/sklearn/preprocessing/_data.py in transform(self, X) 412 force_all_finite="allow-nan") 413 --> 414 X *= self.scale_ 415 X += self.min_ 416 return X ValueError: operands could not be broadcast together with shapes (19343,198) (2,) (19343,198)
plt.imshow(get_classes((res>.1) & XYZ[55][2][m:-m, m:-m]))
<matplotlib.image.AxesImage at 0x79383dc35470>
plt.imshow(cv2.dilate((_1>0).astype('uint8'), np.ones((3,3))))
<matplotlib.image.AxesImage at 0x79383ec710b8>
sss.max()
1
N = 10
l = int(np.log(N)/np.log(2))+1
2**(l+4) / N
25.6
t = np.arange(0, 2**(l+4), 25)
["{0:08b}".format(x) for x in t]
['00000000', '00011001', '00110010', '01001011', '01100100', '01111101', '10010110', '10101111', '11001000', '11100001', '11111010']
np.abs(t.reshape(1, -1) - np.random.randint(256, size=50).reshape(-1, 1)).argmin(axis=1)
array([ 2, 7, 6, 9, 7, 0, 5, 2, 2, 1, 8, 0, 8, 5, 3, 9, 8,
7, 7, 8, 3, 2, 8, 9, 6, 4, 9, 5, 6, 0, 6, 9, 1, 1,
1, 1, 2, 5, 0, 7, 9, 10, 4, 5, 8, 1, 6, 2, 10, 4])
a = np.random.rand(10, 3)
(1+a.dot(a.T**.2))#.sum(axis=0)
array([[2.56706585, 2.33048033, 2.57231358, 2.32754517, 2.39959548,
2.46850553, 2.60619466, 2.59029428, 2.67219455, 2.50950931],
[1.8461522 , 1.78965218, 1.81566946, 1.70656476, 1.82201851,
1.77256526, 1.88060444, 1.90706822, 1.93438748, 1.87899436],
[2.64719657, 2.34152327, 2.67964964, 2.41197942, 2.43244496,
2.57600137, 2.67647735, 2.63300826, 2.74030125, 2.52457813],
[1.71807919, 1.59335194, 1.72852871, 1.62273164, 1.6468314 ,
1.70085784, 1.73123753, 1.71777445, 1.77027547, 1.66119075],
[2.12460112, 2.04864037, 2.08534598, 1.9639864 , 2.13420411,
2.07488206, 2.16669322, 2.20523974, 2.2645669 , 2.13484746],
[2.30599632, 2.05685682, 2.33585305, 2.14819724, 2.17693592,
2.30499027, 2.32398922, 2.29043869, 2.40263073, 2.16620148],
[2.78404888, 2.54116125, 2.7774552 , 2.50184767, 2.60822343,
2.65326233, 2.83435754, 2.82839538, 2.91010587, 2.74977171],
[2.80534647, 2.62472982, 2.76827879, 2.51271159, 2.69203319,
2.6588386 , 2.8681132 , 2.8944954 , 2.96385161, 2.8260797 ],
[3.25670011, 2.9838767 , 3.23330169, 2.92483324, 3.117833 ,
3.13943235, 3.32248598, 3.33648274, 3.45933851, 3.20465454],
[2.57840033, 2.43734777, 2.53707565, 2.29010902, 2.44356081,
2.38755309, 2.64071151, 2.66746727, 2.696476 , 2.65116679]])
(1+a.dot(a[0].T**.2))#.sum()
array([2.56706585, 1.8461522 , 2.64719657, 1.71807919, 2.12460112,
2.30599632, 2.78404888, 2.80534647, 3.25670011, 2.57840033])
import datetime, re
d = re.sub(r':| |\.', '_', f"{datetime.datetime.now()}")
model.save(f'best_classification_{neighSize}_{d}.model_v03')
#ress = list(map(lambda x: predict_image(x[0], model2, neighSize), XYZ))
(300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1) (300, 300) 260 260 67600 (67600, 1)
XYZT = [(x,y,z,t) for (x,y,z), t in zip(XYZ, ress)]
len(XYZT[0])
4
rs = list(map(lambda x: x[2][m:-m, m:-m] * x[3], XYZT))
for x, y, z, t in XYZT:
fig, axes = plt.subplots(1, 3)
axes[0].imshow(x)
axes[1].imshow(z[m:-m, m:-m] * t)
axes[2].imshow(y)
plt.show()
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-614-6ba8ef6d773c> in <module> 4 axes[1].imshow(z[m:-m, m:-m] * t) 5 axes[2].imshow(y) ----> 6 plt.show() ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/pyplot.py in show(*args, **kw) 267 """ 268 global _show --> 269 return _show(*args, **kw) 270 271 ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/ipykernel/pylab/backend_inline.py in show(close, block) 41 display( 42 figure_manager.canvas.figure, ---> 43 metadata=_fetch_figure_metadata(figure_manager.canvas.figure) 44 ) 45 finally: ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/IPython/core/display.py in display(include, exclude, metadata, transient, display_id, *objs, **kwargs) 311 publish_display_data(data=obj, metadata=metadata, **kwargs) 312 else: --> 313 format_dict, md_dict = format(obj, include=include, exclude=exclude) 314 if not format_dict: 315 # nothing to display (e.g. _ipython_display_ took over) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/IPython/core/formatters.py in format(self, obj, include, exclude) 178 md = None 179 try: --> 180 data = formatter(obj) 181 except: 182 # FIXME: log the exception <decorator-gen-9> in __call__(self, obj) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/IPython/core/formatters.py in catch_format_error(method, self, *args, **kwargs) 222 """show traceback on failed format call""" 223 try: --> 224 r = method(self, *args, **kwargs) 225 except NotImplementedError: 226 # don't warn on NotImplementedErrors ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj) 339 pass 340 else: --> 341 return printer(obj) 342 # Finally look for special method names 343 method = get_real_method(obj, self.print_method) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/IPython/core/pylabtools.py in <lambda>(fig) 246 247 if 'png' in formats: --> 248 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs)) 249 if 'retina' in formats or 'png2x' in formats: 250 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs)) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs) 130 FigureCanvasBase(fig) 131 --> 132 fig.canvas.print_figure(bytes_io, **kw) 133 data = bytes_io.getvalue() 134 if fmt == 'svg': ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs) 2089 orientation=orientation, 2090 bbox_inches_restore=_bbox_inches_restore, -> 2091 **kwargs) 2092 finally: 2093 if bbox_inches and restore_bbox: ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs) 525 526 else: --> 527 FigureCanvasAgg.draw(self) 528 renderer = self.get_renderer() 529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \ ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/backends/backend_agg.py in draw(self) 386 self.renderer = self.get_renderer(cleared=True) 387 with RendererAgg.lock: --> 388 self.figure.draw(self.renderer) 389 # A GUI class may be need to update a window using this draw, so 390 # don't forget to call the superclass. ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs) 36 renderer.start_filter() 37 ---> 38 return draw(artist, renderer, *args, **kwargs) 39 finally: 40 if artist.get_agg_filter() is not None: ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/figure.py in draw(self, renderer) 1707 self.patch.draw(renderer) 1708 mimage._draw_list_compositing_images( -> 1709 renderer, self, artists, self.suppressComposite) 1710 1711 renderer.close_group('figure') ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, artists, suppress_composite) 133 if not_composite or not has_images: 134 for a in artists: --> 135 a.draw(renderer) 136 else: 137 # Composite any adjacent images together ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs) 36 renderer.start_filter() 37 ---> 38 return draw(artist, renderer, *args, **kwargs) 39 finally: 40 if artist.get_agg_filter() is not None: ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/axes/_base.py in draw(self, renderer, inframe) 2645 renderer.stop_rasterizing() 2646 -> 2647 mimage._draw_list_compositing_images(renderer, self, artists) 2648 2649 renderer.close_group('axes') ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, artists, suppress_composite) 133 if not_composite or not has_images: 134 for a in artists: --> 135 a.draw(renderer) 136 else: 137 # Composite any adjacent images together ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs) 36 renderer.start_filter() 37 ---> 38 return draw(artist, renderer, *args, **kwargs) 39 finally: 40 if artist.get_agg_filter() is not None: ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/axis.py in draw(self, renderer, *args, **kwargs) 1213 # the actual bbox 1214 -> 1215 self._update_label_position(renderer) 1216 1217 self.label.draw(renderer) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/axis.py in _update_label_position(self, renderer) 2012 # get bounding boxes for this axis and any siblings 2013 # that have been set by `fig.align_xlabels()` -> 2014 bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer) 2015 2016 x, y = self.label.get_position() ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/axis.py in _get_tick_boxes_siblings(self, renderer) 1996 # if we want to align labels from other axes: 1997 for nn, axx in enumerate(grp.get_siblings(self.axes)): -> 1998 ticks_to_draw = axx.xaxis._update_ticks() 1999 tlb, tlb2 = axx.xaxis._get_tick_bboxes(ticks_to_draw, renderer) 2000 bboxes.extend(tlb) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/axis.py in _update_ticks(self) 1078 """ 1079 major_locs = self.get_majorticklocs() -> 1080 major_labels = self.major.formatter.format_ticks(major_locs) 1081 major_ticks = self.get_major_ticks(len(major_locs)) 1082 self.major.formatter.set_locs(major_locs) ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/ticker.py in format_ticks(self, values) 257 def format_ticks(self, values): 258 """Return the tick labels for all the ticks at once.""" --> 259 self.set_locs(values) 260 return [self(value, i) for i, value in enumerate(values)] 261 ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/ticker.py in set_locs(self, locs) 687 self._compute_offset() 688 self._set_order_of_magnitude() --> 689 self._set_format() 690 691 def _compute_offset(self): ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/matplotlib/ticker.py in _set_format(self) 789 thresh = 1e-3 * 10 ** loc_range_oom 790 while sigfigs >= 0: --> 791 if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh: 792 sigfigs -= 1 793 else: ~/anaconda3/envs/tensorGpu2/lib/python3.6/site-packages/numpy/core/_methods.py in _amax(a, axis, out, keepdims, initial, where) 28 def _amax(a, axis=None, out=None, keepdims=False, 29 initial=_NoValue, where=True): ---> 30 return umr_maximum(a, axis, None, out, keepdims, initial, where) 31 32 def _amin(a, axis=None, out=None, keepdims=False, KeyboardInterrupt:
def raghas(args):
x, y = args
z = cv2.erode((x).astype('uint8'), np.ones((3,3)))
z = cv2.dilate((z).astype('uint8'), np.ones((3,3)))
indcs = np.where(z>0)
t = np.array(indcs).T
f = np.array([(z>0)[i-k:i+k, j-k:j+k] for i,j in zip(*indcs)])
ff = np.array([y[i-k:i+k, j-k:j+k] for i,j in zip(*indcs)])
f = f.reshape(*f.shape, 1)
ff = ff.reshape(*ff.shape, 1)
return z, indcs, (f, ff, t), z[indcs]
_ = XYZT[15]
_1 = ((_[2][m:-m,m:-m] * _[3])>.1).astype('uint8')
_1, _2, _3, *_ = raghas((_1, _[0]))
_4, _5, _6 = (_3[0]-mu1)/sig1, (_3[1]-mu2)/sig2, scaler.transform(_3[2])
_ = model.predict([_4, _5, _6])
_ = _ / _.sum(axis=1, keepdims=1)
_ = encoder.inverse_transform(_>.1)
imt = np.zeros_like(_1)
#imt[_2] = _.reshape(-1)
imt[_2] = _.reshape(-1)
plt.imshow(imt)
<matplotlib.image.AxesImage at 0x7936f03547f0>
a = np.zeros_like(imt)
for i in np.where(np.bincount(imt.reshape(-1)))[0][1:]:
tmp = cv2.erode((imt == i).astype('uint8'), np.ones((2, 2)))
_, cnts, _ = cv2.findContours(tmp.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=lambda x: -len(x))[:1]
tmp = np.zeros_like(tmp)
a += cv2.drawContours(tmp, cnts, -1, int(i), -1)
#a += cv2.dilate(tmp, np.ones((4, 4)))*i
plt.imshow(a)
<matplotlib.image.AxesImage at 0x7936aa223b70>
_2
(array([ 39, 39, 39, ..., 215, 215, 215]), array([138, 139, 140, ..., 150, 151, 152]))
_4, _5, _6 = (_3[0]-mu1)/sig1, (_3[1]-mu2)/sig2, scaler.transform(_3[2])
imt = np.zeros_like(_1)
#imt[_2] = _.reshape(-1)
imt[_2] = _.reshape(-1)
--------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-641-cda48167d19c> in <module> 1 imt = np.zeros_like(_1) 2 #imt[_2] = _.reshape(-1) ----> 3 imt[_2] = _.reshape(-1) AttributeError: 'list' object has no attribute 'reshape'
a = np.zeros_like(imt)
for i in np.where(np.bincount(imt.reshape(-1)))[0][1:]:
tmp = cv2.erode((imt == i).astype('uint8'), np.ones((2, 2)))
_, cnts, _ = cv2.findContours(tmp.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=lambda x: -len(x))[:1]
tmp = np.zeros_like(tmp)
a += cv2.drawContours(tmp, cnts, -1, int(i), -1)
#a += cv2.dilate(tmp, np.ones((4, 4)))*i
plt.imshow(a)
<matplotlib.image.AxesImage at 0x793831965e48>
from multiprocessing import Array, Queue, Process, Pool
def mardas(args):
i = 0
while True:
yield i
i += 1
def gen_runner(gen, args):
a = gen(args)
print(a)
while l.empty():
x = a.__next__()
while True:
try:
q.put_nowait(x)
break
except:
if not l.empty():
return
def run_gen_in_par(gen, args, q, l, n_procs=4):
ps = [Process(target=gen_runner, args=(gen, args)) for i in range(n_procs)]
for p in ps:
p.start()
return ps
q = Queue(maxsize=10)
l = Queue()
ps = run_gen_in_par(mardas, (3,), q, l)
<generator object mardas at 0x7ac7bcf69a40> <generator object mardas at 0x7ac7bcf69a40> <generator object mardas at 0x7ac7bcf69a40> <generator object mardas at 0x7ac7bcf69a40>
while not q.empty():
print(q.get())
28 24 22 15 29 25 16 30 17 31 18 23
l.put(1)
for p in ps:
p.join()
%reset_selective -f .*(?<!_)$
from custom_imports import *
IXYZ = joblib.load('IXYZ.pkl')
results = joblib.load('11_muscle_predictions.pkl')
len(IXYZ), len(results)
(321, 167)